Deisgn X and data generation

Отладочные графики генерации данных для шума

train <- GetSimulationData(10000, scenario = "shvechikov.2", sd=0.0)
curve(Shvechikov.1.fopt, from=0, to=100)

curve(Shvechikov.2.fopt, from=0, to=1)

with(train, plot(covariates, optimal.treatment, 
                 pch=19, cex=1.2, col=rgb(0,0.5,0.5, 0.03)))

with(train, plot(covariates, treatment - optimal.treatment, 
                 pch=19, cex=1.2, col=rgb(0,0.5,0.5, .03)))

with(train, plot(covariates, abs(treatment - optimal.treatment)**2, 
                 pch=19, cex=1.2, col=rgb(0,0.5,0.5, 0.03)))

with(train, plot(covariates, GetQFunctionValues(covariates, treatment, optimal.treatment), 
                 pch=19, cex=1.2, col=rgb(0,0.5,0.5, 0.03)))

train <- GetSimulationData(1000, scenario = "shvechikov.2", sd=0.0)
granularity <- 4
levels <- 1:granularity / granularity
d <- with(train, data.frame(reward=reward, treatment=treatment, covariates=covariates, 
                            treat.bins=cut(treatment, c(0, quantile(treatment, levels)), include.lowest = T), 
                            rew.bins=cut(reward, c(0, quantile(reward, levels)), include.lowest = T), 
                            cov.bins=cut(covariates, c(0, quantile(covariates, levels)), include.lowest = T)))

ggplot(d, aes(covariates, treatment, col=reward)) +  geom_point() + geom_smooth() + facet_wrap(~rew.bins) +
   scale_color_gradient(low="red",  high="green")

ggplot(d, aes(treatment, reward, col=covariates)) + geom_point() + geom_smooth() + facet_wrap(~cov.bins)

ggplot(d, aes(covariates, reward, col=reward)) + geom_point() + geom_smooth() + facet_wrap(~treat.bins)

ggplot(d, aes(covariates, treatment, col=reward)) +  geom_point() + geom_smooth() + facet_wrap(~rew.bins) + geom_density2d(col="black")

Визуализация облака точек из обучающей выборки:

train <- GetSimulationData(200, scenario = "shvechikov.1", sd=0)
d <- with(train, data.frame(reward=reward, treatment=treatment, covariates=covariates, 
                            treat.bins=cut(treatment, c(0, quantile(treatment, levels)), include.lowest = T), 
                            rew.bins=cut(reward, c(0, quantile(reward, levels)), include.lowest = T), 
                            cov.bins=cut(covariates, c(0, quantile(covariates, levels)), include.lowest = T)))
for (i in seq(-180, 180, 5)) {
  # x - treatment;  y - covariates; z - reward
  print(cloud(reward ~  treatment * covariates, data=d, screen = list(z = i, x = -60, y = 0)))
}

All possible models in tgp package

Some helper functions:

GetBestPredictions <- function(gp_model, s = 1) {
  means <- gp_model$ZZ.mean
  stds <- sqrt(gp_model$ZZ.s2)
  dt <- data.table(gp_model$XX) 
  dt[, LB:= means - s * stds]
  return(dt[, .(est_A=A[which.max(LB)], est_LB=max(LB)), keyby=C])
}

GetValueOfPredictedA <- function(best.estim.dt, test.obj)  
  best.estim.dt[, mean(test.obj$GetQFunctionValues(C, est_A))]

GetTunedBestPredictions <- function(model, test.obj=test, seq.of.s=NULL) {
  if (is.null(seq.of.s)) {
    seq.of.s <- seq(0, 20, length.out = 100)
  }
  seq.of.Q.vals <- sapply(seq.of.s, function(s) {
    result.dt <- GetBestPredictions(model, s)
    GetValueOfPredictedA(result.dt, test.obj)
  }) 
  plot(seq.of.s, seq.of.Q.vals, type="l")
  best.s <- seq.of.s[which.max(seq.of.Q.vals)]
  best.result.dt <-  GetBestPredictions(model, s=best.s)
  return (list(m_pred=best.result.dt, best.s=best.s, best.Q=max(seq.of.Q.vals)))
}

PlotDecisionSurface <- function(models) {
  for(m_name in names(models)) {
    m <-  models[[m_name]]
    surf <- matrix(m$ZZ.mean - s * sqrt(m$ZZ.s2), nrow=length(unique(m$XX$A)))
    plt1 <- levelplot(surf, col.regions = gray(0:100/100),  xlab="C",  ylab="A")
    plt2 <- wireframe(surf, xlab="C", ylab="A", zlab="decision surf",  main=m_name, 
                      par.settings = list(axis.line = list(col = "transparent")))
    grid.arrange(plt1, plt2, ncol=2)
  }
}

FitAndPlotAllModels <- function(noise.sd=0.1, n_samples=100)  {
  train <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=noise.sd)
  test <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=noise.sd)
  A.grid <- seq(0, 1, length.out = min(n_samples, 100)) 
  X <- with(train, data.frame(C=covariates, A=treatment))
  Y <- train$reward
  ZZ <- expand.grid(seq(0,1,length.out = n_samples), A.grid)  
  
  # fit all models
  func_names <- c("blm", "btlm", "bcart", "bgp", "bgpllm", "btgp", "btgpllm")
  models <- lapply(func_names, function(f_name) do.call(f_name, list(X, Y, ZZ)))
  names(models) <- func_names
  
  predictions <- list()
  best.s <- list()
  best.Q <- list()
  for (m in models) {
    tuned.res <- GetTunedBestPredictions(m, test=test)
    predictions[[1]]  <- tuned.res$m_pred$C
    predictions[[length(predictions) + 1]]  <- tuned.res$m_pred$est_A
    best.s[[length(best.s) + 1]]  <- tuned.res$best.s
    best.Q[[length(best.Q) + 1]] <- tuned.res$best.Q
  }
  dt <- as.data.table(predictions)
  formatted.names <- paste(func_names, paste(", s =", round(unlist(best.s),2)), sep="")
  formatted.names <- paste(formatted.names, paste(", Q =",  round(unlist(best.Q), 2)), sep="")
  names(dt) <- c("C", formatted.names)
  dt.melted <- melt(dt, id.vars = "C", variable.name = "Algo",  value.name = "est_A")
  gg <- ggplot(dt.melted, aes(C, est_A)) + geom_point() + geom_smooth() + facet_wrap(~ Algo, nrow = 2)
  print(gg)
  gg <- ggplot(dt.melted, aes(C, est_A, col=Algo)) + geom_smooth()
  print(gg)
  dt.melted[, optimal_A := test$GetOptimalTreatment(C)]
  gg <- ggplot(dt.melted, aes(x=C, y=est_A)) + geom_point() + geom_smooth() + 
    geom_line(aes(C, optimal_A, col="red")) + facet_wrap(~ Algo, nrow = 2)
  print(gg)
  
  
  plot(models$blm)
  plot(models$btlm)
  plot(models$bcart)
  plot(models$bgp)
  plot(models$bgpllm)
  plot(models$btgp)
  plot(models$btgpllm)
  
  PlotDecisionSurface(models)
}

No noise, 100 observations, testing on uniform grid

If there are no noise in data – it is best to predict using $(f_*) - 0 $. And it is very cool, that model learns almost perfect solution in regeions where there are no trainning points.

n_samples = 100
sd = 0
train <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=sd)
test <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=sd)
A.grid <- seq(0, 1, length.out = n_samples) 
X <- with(train, data.frame(C=covariates, A=treatment))
Y <- train$reward
ZZ <- expand.grid(seq(0,1,length.out = n_samples), A.grid)

m <- bgp(X, Y, XX=ZZ)
## 
## burn in:
## r=1000 d=[0.154177 1.78213]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.150504 1.52539]; mh=1 n=100
## r=2000 d=[0.171119 1.7686]; mh=1 n=100
## r=3000 d=[0.159719 1.59994]; mh=1 n=100
plot(m)

pred <- GetBestPredictions(m)

ggplot(pred, aes(C, est_A)) + geom_smooth() + geom_point() + 
  geom_line(aes(C, test$GetOptimalTreatment(C)), col="red") + 
  geom_vline(xintercept = max(train$covariates), col="green") + 
  geom_vline(xintercept = min(train$covariates), col="green") 

  # ggtitle(paste("best s = ", p$best.s))

seq.of.s <- exp(seq(-3,3, length.out=100))
p <- GetTunedBestPredictions(m, test, seq.of.s = seq.of.s)
ggplot(p$m_pred, aes(C, est_A)) + geom_smooth() + geom_point() + 
  geom_line(aes(C, test$GetOptimalTreatment(C)), col="red") + 
  geom_vline(xintercept = max(train$covariates), col="green") + 
  geom_vline(xintercept = min(train$covariates), col="green") +
  ggtitle(paste("best s = ", p$best.s))

So let try to plot our decisions with different S. And see that

  • we could increase s as much as we want and obtain degenerate solution.
seq.of.s <- exp(seq(-1,3, length.out=40))
for (s in seq.of.s) {
  m_pred <- GetBestPredictions(m, s)
  gg <- ggplot(m_pred, aes(C, est_A)) + geom_smooth() + geom_point() + 
    geom_line(aes(C, test$GetOptimalTreatment(C))) +
    geom_vline(xintercept = max(train$covariates), col="green") + 
    geom_vline(xintercept = min(train$covariates), col="green") +
    ggtitle(paste("s = ", s))
  print(gg)
}

TODO:

  • why the minimum of fitted best treatment policy does not match the minimum of our best treatment?

In case we add very strong noise we get perfectly bad fitted surface:

n_samples = 100
sd = 0.15
train <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=sd)
test <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=sd)
A.grid <- seq(0, 1, length.out = n_samples) 
X <- with(train, data.frame(C=covariates, A=treatment))
Y <- train$reward
ZZ <- expand.grid(seq(0,1,length.out = n_samples), A.grid)

m <- bgp(X, Y, XX=ZZ)
## 
## burn in:
## r=1000 d=[0.820254 0.560019]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.79397 0.697084]; mh=1 n=100
## r=2000 d=[0.425448 0.732678]; mh=1 n=100
## r=3000 d=[0.839584 0.466754]; mh=1 n=100
plot(m)

p <- GetTunedBestPredictions(m, test)
p$best.s
## [1] 18.38384
ggplot(p$m_pred, aes(C, est_A)) + geom_smooth() + geom_point() + 
  geom_line(aes(C, test$GetOptimalTreatment(C)), col="red") + 
  geom_vline(xintercept = max(train$covariates), col="green") + 
  geom_vline(xintercept = min(train$covariates), col="green") + 
  ggtitle(paste("Best s= ", p$best.s))

But even with this strong noise we could get more samples:

n_samples = 300
sd = 0.15
train <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=sd)
test <- GetSimulationData(n_samples, scenario = "shvechikov.2", sd=sd)
A.grid <- seq(0, 1, length.out = min(n_samples, 100))
X <- with(train, data.frame(C=covariates, A=treatment))
Y <- train$reward
ZZ <- expand.grid(seq(0,1,length.out = n_samples), A.grid)

m <- bgp(X, Y, XX=ZZ)
## 
## burn in:
## r=1000 d=[0.0489868 0.687975]; n=300
## 
## Sampling @ nn=30000 pred locs:
## r=1000 d=[0.183456 1.01734]; mh=1 n=300
## r=2000 d=[0.099061 0.876473]; mh=1 n=300
## r=3000 d=[0.0927358 0.985005]; mh=1 n=300
plot(m)

p <- GetTunedBestPredictions(m, test)
p$best.s
## [1] 0
ggplot(p$m_pred, aes(C, est_A)) + geom_smooth() + geom_point() + 
  geom_line(aes(C, test$GetOptimalTreatment(C)), col="red") + 
  geom_vline(xintercept = max(train$covariates), col="green") + 
  geom_vline(xintercept = min(train$covariates), col="green") + 
  ggtitle(paste("Best s= ", p$best.s))

Experiment with all of the models - no noise

FitAndPlotAllModels(noise.sd = 0)
## 
## burn in:
## r=1000 d=[0]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0]; mh=1 n=100
## r=2000 d=[0]; mh=1 n=100
## r=3000 d=[0]; mh=1 n=100
## 
## 
## burn in:
## **GROW** @depth 0: [2,0.616162], n=(81,19)
## **GROW** @depth 1: [2,0.282828], n=(35,46)
## **PRUNE** @depth 1: [2,0.282828]
## **GROW** @depth 1: [1,0.414141], n=(51,30)
## **GROW** @depth 2: [2,0.30303], n=(24,27)
## r=1000 d=[0] [0] [0] [0]; n=(24,25,30,21)
## **GROW** @depth 1: [2,0.494949], n=(27,14)
## r=2000 d=[0] [0] [0] [0] [0]; n=(24,20,12,30,14)
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0] [0] [0] [0] [0]; mh=4 n=(25,21,11,29,14)
## r=2000 d=[0] [0] [0] [0] [0]; mh=4 n=(24,23,12,29,12)
## r=3000 d=[0] [0] [0] [0] [0]; mh=4 n=(24,21,12,31,12)
## **GROW** @depth 3: [2,0.20202], n=(13,11)
## r=4000 d=[0] [0] [0] [0] [0] [0]; mh=5 n=(13,12,20,12,31,12)
## **GROW** @depth 2: [1,0.545455], n=(16,16)
## **PRUNE** @depth 2: [1,0.545455]
## **PRUNE** @depth 2: [2,0.20202]
## r=5000 d=[0] [0] [0] [0] [0]; mh=4 n=(24,20,12,32,12)
## Grow: 1.882%, Prune: 0.9434%, Change: 70.39%, Swap: 31.48%
## 
## 
## burn in:
## **GROW** @depth 0: [2,0.494949], n=(71,29)
## **GROW** @depth 1: [2,0.222222], n=(24,45)
## r=1000 d=[0] [0] [0]; n=(24,46,30)
## r=2000 d=[0] [0] [0]; n=(24,47,29)
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0] [0] [0]; mh=3 n=(23,49,28)
## r=2000 d=[0] [0] [0]; mh=3 n=(22,49,29)
## r=3000 d=[0] [0] [0]; mh=3 n=(24,48,28)
## **GROW** @depth 1: [2,0.656566], n=(15,14)
## **PRUNE** @depth 1: [2,0.656566]
## r=4000 d=[0] [0] [0]; mh=3 n=(24,46,30)
## r=5000 d=[0] [0] [0]; mh=3 n=(22,50,28)
## Grow: 0.8902%, Prune: 0.304%, Change: 70.6%, Swap: 100%
## 
## 
## burn in:
## r=1000 d=[0.166358 2.27645]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.152641 2.03569]; mh=1 n=100
## r=2000 d=[0.179641 1.57258]; mh=1 n=100
## r=3000 d=[0.155745 1.86463]; mh=1 n=100
## 
## 
## burn in:
## r=1000 d=[0.162615 2.08801]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.130784 1.52385]; mh=1 n=100
## r=2000 d=[0.152022 1.82825]; mh=1 n=100
## r=3000 d=[0.154289 2.10404]; mh=1 n=100
## 
## 
## burn in:
## r=1000 d=[0.141035 1.67728]; n=100
## r=2000 d=[0.176045 2.02578]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.14478 1.94416]; mh=1 n=100
## r=2000 d=[0.173298 1.69455]; mh=1 n=100
## r=3000 d=[0.145677 1.86298]; mh=1 n=100
## r=4000 d=[0.15464 1.75576]; mh=1 n=100
## r=5000 d=[0.172731 2.48352]; mh=1 n=100
## Grow: 0%, 
## 
## 
## burn in:
## **GROW** @depth 0: [1,0.585859], n=(84,16)
## **PRUNE** @depth 0: [1,0.585859]
## r=1000 d=[0.1423 1.68091]; n=100
## r=2000 d=[0.122273 1.47355]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.204822 2.20474]; mh=1 n=100
## r=2000 d=[0.167745 1.54842]; mh=1 n=100
## r=3000 d=[0.166239 1.80224]; mh=1 n=100
## r=4000 d=[0.143251 2.20264]; mh=1 n=100
## r=5000 d=[0.148454 1.61592]; mh=1 n=100
## Grow: 0.2817%, Prune: 100%,

Experiment with all of the models - MODERATE noise

FitAndPlotAllModels(noise.sd = 0.04)
## 
## burn in:
## r=1000 d=[0]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0]; mh=1 n=100
## r=2000 d=[0]; mh=1 n=100
## r=3000 d=[0]; mh=1 n=100
## 
## 
## burn in:
## **GROW** @depth 0: [1,0.515152], n=(81,19)
## **PRUNE** @depth 0: [1,0.505051]
## **GROW** @depth 0: [1,0.494949], n=(80,20)
## **GROW** @depth 1: [1,0.212121], n=(25,55)
## **GROW** @depth 2: [2,0.494949], n=(38,19)
## **PRUNE** @depth 2: [2,0.494949]
## **GROW** @depth 2: [2,0.515152], n=(39,18)
## r=1000 d=[0] [0] [0] [0]; n=(25,40,16,19)
## r=2000 d=[0] [0] [0] [0]; n=(27,42,16,15)
## 
## Sampling @ nn=10000 pred locs:
## **GROW** @depth 1: [2,0.414141], n=(16,11)
## r=1000 d=[0] [0] [0] [0] [0]; mh=4 n=(16,13,46,13,12)
## r=2000 d=[0] [0] [0] [0] [0]; mh=4 n=(15,16,47,11,11)
## **GROW** @depth 3: [1,0.363636], n=(24,24)
## **PRUNE** @depth 3: [1,0.373316]
## r=3000 d=[0] [0] [0] [0] [0]; mh=4 n=(15,15,48,11,11)
## r=4000 d=[0] [0] [0] [0] [0]; mh=4 n=(18,12,48,11,11)
## **GROW** @depth 3: [1,0.363636], n=(23,24)
## r=5000 d=[0] [0] [0] [0] [0] [0]; mh=4 n=(18,12,23,24,12,11)
## Grow: 2.367%, Prune: 0.8333%, Change: 74.73%, Swap: 35.2%
## 
## 
## burn in:
## **GROW** @depth 0: [2,0.494949], n=(69,31)
## r=1000 d=[0] [0]; n=(79,21)
## r=2000 d=[0] [0]; n=(83,17)
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0] [0]; mh=2 n=(84,16)
## r=2000 d=[0] [0]; mh=2 n=(84,16)
## r=3000 d=[0] [0]; mh=2 n=(84,16)
## **GROW** @depth 1: [1,0.562613], n=(72,12)
## **PRUNE** @depth 1: [1,0.562613]
## **GROW** @depth 1: [1,0.59596], n=(73,11)
## **PRUNE** @depth 1: [1,0.596949]
## r=4000 d=[0] [0]; mh=2 n=(84,16)
## r=5000 d=[0] [0]; mh=2 n=(84,16)
## Grow: 0.8333%, Prune: 0.554%, Change: 68.75%, 
## 
## 
## burn in:
## r=1000 d=[0.113758 0.244005]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.16889 1.72085]; mh=1 n=100
## r=2000 d=[0.0726863 0.557486]; mh=1 n=100
## r=3000 d=[0.0856608 0.999713]; mh=1 n=100
## 
## 
## burn in:
## r=1000 d=[0.09503 0.67533]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.119088 0.739861]; mh=1 n=100
## r=2000 d=[0.141178 0.868005]; mh=1 n=100
## r=3000 d=[0.109814 0.467577]; mh=1 n=100
## 
## 
## burn in:
## r=1000 d=[0.113002 0.74874]; n=100
## r=2000 d=[0.203468 0.537353]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.0847842 1.35761]; mh=1 n=100
## r=2000 d=[0.180203 0.618149]; mh=1 n=100
## r=3000 d=[0.20623 0.880997]; mh=1 n=100
## r=4000 d=[0.153824 1.21729]; mh=1 n=100
## r=5000 d=[0.0936004 0.793942]; mh=1 n=100
## Grow: 0%, 
## 
## 
## burn in:
## r=1000 d=[0.154946 0.948123]; n=100
## r=2000 d=[0.125017 0.846324]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.0936033 0.998341]; mh=1 n=100
## r=2000 d=[0.170409 0.795872]; mh=1 n=100
## r=3000 d=[0.131837 0.991464]; mh=1 n=100
## r=4000 d=[0.135513 0.963878]; mh=1 n=100
## r=5000 d=[0.0976862 0.90773]; mh=1 n=100
## Grow: 0%,

Experiment with all of the models - STRONG noise

FitAndPlotAllModels(noise.sd = 0.1)
## 
## burn in:
## r=1000 d=[0]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0]; mh=1 n=100
## r=2000 d=[0]; mh=1 n=100
## r=3000 d=[0]; mh=1 n=100
## 
## 
## burn in:
## **GROW** @depth 0: [2,0.353535], n=(43,57)
## **GROW** @depth 1: [1,0.515152], n=(48,11)
## **PRUNE** @depth 1: [1,0.494949]
## **GROW** @depth 1: [1,0.363636], n=(35,23)
## r=1000 d=[0] [0] [0]; n=(47,33,20)
## **PRUNE** @depth 1: [2,0.380578]
## **GROW** @depth 1: [1,0.173539], n=(15,46)
## **GROW** @depth 1: [2,0.515152], n=(22,14)
## **PRUNE** @depth 1: [2,0.515152]
## r=2000 d=[0] [0] [0]; n=(14,49,37)
## 
## Sampling @ nn=10000 pred locs:
## **GROW** @depth 1: [2,0.545455], n=(23,13)
## **PRUNE** @depth 2: [2,0.545455]
## **GROW** @depth 2: [1,0.59596], n=(27,11)
## **PRUNE** @depth 1: [1,0.569093]
## r=1000 d=[0] [0] [0]; mh=3 n=(12,51,37)
## **GROW** @depth 2: [2,0.206932], n=(12,40)
## **PRUNE** @depth 2: [2,0.206932]
## **GROW** @depth 1: [1,0.535354], n=(19,15)
## **PRUNE** @depth 2: [1,0.383838]
## r=2000 d=[0] [0] [0]; mh=3 n=(11,72,17)
## r=3000 d=[0] [0] [0]; mh=3 n=(12,67,21)
## **GROW** @depth 2: [2,0.545455], n=(55,13)
## **PRUNE** @depth 2: [2,0.545455]
## **GROW** @depth 2: [1,0.373737], n=(52,21)
## **PRUNE** @depth 2: [1,0.373737]
## r=4000 d=[0] [0] [0]; mh=3 n=(11,73,16)
## r=5000 d=[0] [0] [0]; mh=3 n=(11,73,16)
## Grow: 3.134%, Prune: 2.406%, Change: 71.2%, Swap: 74.93%
## 
## 
## burn in:
## **GROW** @depth 0: [2,0.505051], n=(64,36)
## **GROW** @depth 1: [2,0.707071], n=(21,13)
## **PRUNE** @depth 1: [2,0.515152]
## r=1000 d=[0] [0]; n=(87,13)
## **GROW** @depth 1: [2,0.393939], n=(49,39)
## **PRUNE** @depth 1: [2,0.393939]
## r=2000 d=[0] [0]; n=(88,12)
## 
## Sampling @ nn=10000 pred locs:
## **GROW** @depth 1: [2,0.424242], n=(55,33)
## **PRUNE** @depth 1: [2,0.424242]
## **GROW** @depth 1: [2,0.353535], n=(43,45)
## **PRUNE** @depth 1: [2,0.363636]
## r=1000 d=[0] [0]; mh=3 n=(88,12)
## r=2000 d=[0] [0]; mh=3 n=(88,12)
## **GROW** @depth 1: [2,0.30303], n=(32,56)
## **PRUNE** @depth 1: [2,0.30303]
## r=3000 d=[0] [0]; mh=3 n=(88,12)
## **GROW** @depth 1: [2,0.353535], n=(43,45)
## **PRUNE** @depth 1: [2,0.353535]
## r=4000 d=[0] [0]; mh=3 n=(87,13)
## r=5000 d=[0] [0]; mh=3 n=(88,12)
## Grow: 1.887%, Prune: 1.765%, Change: 64.86%, Swap: 100%
## 
## 
## burn in:
## r=1000 d=[0.0828484 1.11308]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.0660536 0.463871]; mh=1 n=100
## r=2000 d=[0.0935459 0.724467]; mh=1 n=100
## r=3000 d=[0.0697393 0.14968]; mh=1 n=100
## 
## 
## burn in:
## r=1000 d=[0.0998791 0.822005]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.0828332 0.510001]; mh=1 n=100
## r=2000 d=[0.0754856 0.707993]; mh=1 n=100
## r=3000 d=[0.182293 0.970864]; mh=1 n=100
## 
## 
## burn in:
## **GROW** @depth 0: [1,0.515152], n=(84,16)
## **PRUNE** @depth 0: [1,0.494949]
## r=1000 d=[0.0628612 0.458796]; n=100
## **GROW** @depth 0: [1,0.454545], n=(79,21)
## **PRUNE** @depth 0: [1,0.444444]
## **GROW** @depth 0: [1,0.484848], n=(83,17)
## **PRUNE** @depth 0: [1,0.484848]
## r=2000 d=[0.124144 0.717496]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.117989 0.649746]; mh=1 n=100
## r=2000 d=[0.0975024 1.4839]; mh=1 n=100
## r=3000 d=[0.0336089 0.621885]; mh=1 n=100
## **GROW** @depth 0: [1,0.424242], n=(75,25)
## **PRUNE** @depth 0: [1,0.424242]
## r=4000 d=[0.0882767 0.738484]; mh=1 n=100
## r=5000 d=[0.0298797 0.390827]; mh=1 n=100
## Grow: 1.09%, Prune: 100%, Change: 100%, 
## 
## 
## burn in:
## **GROW** @depth 0: [2,0.503382], n=(64,36)
## **PRUNE** @depth 0: [2,0.494949]
## r=1000 d=[0.122908 0.384497]; n=100
## r=2000 d=[0.135612 0.96919]; n=100
## 
## Sampling @ nn=10000 pred locs:
## r=1000 d=[0.0542049 0.94603]; mh=1 n=100
## r=2000 d=[0.0820045 0.0816785]; mh=1 n=100
## r=3000 d=[0.0656433 0.693151]; mh=1 n=100
## r=4000 d=[0.0447109 0.067903]; mh=1 n=100
## r=5000 d=[0.0438541 0.642483]; mh=1 n=100
## Grow: 0.277%, Prune: 100%, Change: 100%,